import keras
keras.__version__
'2.14.0'
from keras.models import load_model
# Load the model
model = load_model('best_model.h5')
model.summary() # As a reminder.
WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.RMSprop` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.RMSprop`.
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 148, 148, 32) 896
max_pooling2d (MaxPooling2 (None, 74, 74, 32) 0
D)
conv2d_1 (Conv2D) (None, 72, 72, 64) 18496
max_pooling2d_1 (MaxPoolin (None, 36, 36, 64) 0
g2D)
conv2d_2 (Conv2D) (None, 34, 34, 128) 73856
max_pooling2d_2 (MaxPoolin (None, 17, 17, 128) 0
g2D)
conv2d_3 (Conv2D) (None, 15, 15, 128) 147584
max_pooling2d_3 (MaxPoolin (None, 7, 7, 128) 0
g2D)
flatten (Flatten) (None, 6272) 0
dense (Dense) (None, 512) 3211776
dense_1 (Dense) (None, 1) 513
=================================================================
Total params: 3453121 (13.17 MB)
Trainable params: 3453121 (13.17 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
img_path = '/Users/shiveshrajsahu/Desktop/Cs767/cs767A1_SAHU_SHIVESH_HW05/Dragon.jpeg'
# We preprocess the image into a 4D tensor
from keras.preprocessing import image
from tensorflow.keras import utils
import numpy as np
img = utils.load_img(img_path, target_size=(150, 150))
img_tensor = utils.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
# Remember that the model was trained on inputs
# that were preprocessed in the following way:
img_tensor /= 255.
# Its shape is (1, 150, 150, 3)
print(img_tensor.shape)
(1, 150, 150, 3)
import matplotlib.pyplot as plt
plt.imshow(img_tensor[0])
plt.show()
from keras import models
# Extracts the outputs of the top 8 layers:
layer_outputs = [layer.output for layer in model.layers[:8]]
# Creates a model that will return these outputs, given the model input:
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
# This will return a list of 5 Numpy arrays:
# one array per layer activation
activations = activation_model.predict(img_tensor)
1/1 [==============================] - 0s 35ms/step
first_layer_activation = activations[0]
print(first_layer_activation.shape)
(1, 148, 148, 32)
import matplotlib.pyplot as plt
plt.matshow(first_layer_activation[0, :, :, 2], cmap='viridis')
plt.show()
plt.matshow(first_layer_activation[0, :, :, 28], cmap='viridis')
plt.show()
import keras
# These are the names of the layers, so can have them as part of our plot
layer_names = []
for layer in model.layers[:8]:
layer_names.append(layer.name)
images_per_row = 16
# Now let's display our feature maps
for layer_name, layer_activation in zip(layer_names, activations):
# This is the number of features in the feature map
n_features = layer_activation.shape[-1]
# The feature map has shape (1, size, size, n_features)
size = layer_activation.shape[1]
# We will tile the activation channels in this matrix
n_cols = n_features // images_per_row
display_grid = np.zeros((size * n_cols, images_per_row * size))
# We'll tile each filter into this big horizontal grid
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0,
:, :,
col * images_per_row + row]
# Post-process the feature to make it visually palatable
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size : (col + 1) * size,
row * size : (row + 1) * size] = channel_image
# Display the grid
scale = 1. / size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
plt.show()
/var/folders/hj/z0251grs2p563x5blq98v7p40000gn/T/ipykernel_73806/2282455063.py:30: RuntimeWarning: invalid value encountered in divide channel_image /= channel_image.std()
from tensorflow.keras.applications import VGG16
from tensorflow.keras import backend as K
model = VGG16(weights='imagenet',
include_top=False)
layer_name = 'block3_conv1'
filter_index = 0
layer_output = model.get_layer(layer_name).output
loss = K.mean(layer_output[:, :, :, filter_index])
model.summary()
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_4 (InputLayer) [(None, None, None, 3)] 0
block1_conv1 (Conv2D) (None, None, None, 64) 1792
block1_conv2 (Conv2D) (None, None, None, 64) 36928
block1_pool (MaxPooling2D) (None, None, None, 64) 0
block2_conv1 (Conv2D) (None, None, None, 128) 73856
block2_conv2 (Conv2D) (None, None, None, 128) 147584
block2_pool (MaxPooling2D) (None, None, None, 128) 0
block3_conv1 (Conv2D) (None, None, None, 256) 295168
block3_conv2 (Conv2D) (None, None, None, 256) 590080
block3_conv3 (Conv2D) (None, None, None, 256) 590080
block3_pool (MaxPooling2D) (None, None, None, 256) 0
block4_conv1 (Conv2D) (None, None, None, 512) 1180160
block4_conv2 (Conv2D) (None, None, None, 512) 2359808
block4_conv3 (Conv2D) (None, None, None, 512) 2359808
block4_pool (MaxPooling2D) (None, None, None, 512) 0
block5_conv1 (Conv2D) (None, None, None, 512) 2359808
block5_conv2 (Conv2D) (None, None, None, 512) 2359808
block5_conv3 (Conv2D) (None, None, None, 512) 2359808
block5_pool (MaxPooling2D) (None, None, None, 512) 0
=================================================================
Total params: 14714688 (56.13 MB)
Trainable params: 14714688 (56.13 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
#Identify the Layers:
#From the model summary, we can see the first eight layers are:
#conv2d (Conv2D)
#max_pooling2d (MaxPooling2D)
#conv2d_1 (Conv2D)
#max_pooling2d_1 (MaxPooling2D)
#conv2d_2 (Conv2D)
#max_pooling2d_2 (MaxPooling2D)
#conv2d_3 (Conv2D)
#max_pooling2d_3 (MaxPooling2D)
#Extract Activations for Selected Layers:
#Instead of extracting outputs for all the first eight layers,
#extract only for the three layers of interest:
# Layers of interest
layers_of_interest = ['conv2d_1', 'conv2d_2', 'max_pooling2d_1']
# Extracts the outputs of the layers of interest
layer_outputs = [layer.output for layer in model.layers if layer.name in layers_of_interest]
#Visualize the Activations:
# Names of the layers, so can have them as part of our plot
layer_names = [layer.name for layer in model.layers if layer.name in layers_of_interest]
# My existing code for visualizing the feature maps can remain unchanged.
# Visualization
images_per_row = 16
for layer_name, layer_activation in zip(layer_names, activations):
print(f"Visualizing activations of layer: {layer_name}") # Print the layer name
n_features = layer_activation.shape[-1]
size = layer_activation.shape[1]
n_cols = n_features // images_per_row
display_grid = np.zeros((size * n_cols, images_per_row * size))
for col in range(n_cols):
for row in range(images_per_row):
channel_image = layer_activation[0, :, :, col * images_per_row + row]
# Uncomment the below line if you want to print which channel is being visualized.
# print(f"Visualizing feature map {col * images_per_row + row} of layer: {layer_name}")
channel_image -= channel_image.mean()
channel_image /= (channel_image.std() + 1e-5) # Adding a small value to prevent division by zero
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size : (col + 1) * size, row * size : (row + 1) * size] = channel_image
scale = 1. / size
plt.figure(figsize=(scale * display_grid.shape[1], scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
plt.show()
# My existing code for visualizing the feature maps can remain unchanged.